
In [376]:
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
In [377]:
img1 = cv.imread("a1images/emma.jpg",cv.IMREAD_GRAYSCALE)
t1 = np.linspace(0, 50, 51, dtype=np.uint8)
t2 = np.linspace(100, 255, 150 - 51 + 1, dtype=np.uint8)
t3 = np.linspace(150, 255, 255 - 151 + 1, dtype=np.uint8)
#np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)
#start: The beginning value of the sequence.
#stop: The end value of the sequence.
#num: How many samples to generate (default 50).
#endpoint: Whether to include the stop value (default True).
#retstep: If True, return the samples and the step size.
#dtype: Data type of the output array.
In [378]:
transform1 = np.concatenate((t1, t2, t3), axis=0).astype(np.uint8)
img1_transformed = cv.LUT(img1,transform1)
In [379]:
plt.plot(transform1)
plt.title("Transform curve")
plt.xlabel("Index")
plt.ylabel("Value")
plt.grid(True)
plt.show()
In [380]:
plt.figure(figsize=(10, 5))
# Plot original image
plt.subplot(1, 2, 1)
plt.title("Original Image")
img1_rgb = cv.cvtColor(img1, cv.COLOR_BGR2RGB)
plt.imshow(img1_rgb) # Use cmap='gray' if image is grayscale
plt.axis('off')
# Plot transformed image
plt.subplot(1, 2, 2)
plt.title("Transformed Image")
img1_transformed_rgb = cv.cvtColor(img1_transformed, cv.COLOR_BGR2RGB)
plt.imshow(img1_transformed_rgb)
plt.axis('off')
plt.show()

In [ ]:
img2 = cv.imread("a1images/brain_proton_density_slice.png")
In [382]:
lut_white= np.array([
int(i * 0.4) if i < 180 else min(255, int(i * 1.3))
for i in range(256)
], dtype=np.uint8)
lut_gray= np.array([
int(i * 0.3) if (i < 80 or i > 160) else min(255, int(i * 1.5))
for i in range(256)
], dtype=np.uint8)
In [383]:
white_matter = cv.LUT(img2,lut_white)
gray_matter = cv.LUT(img2,lut_gray)
In [384]:
plt.figure(figsize=(10, 5))
# Plot original image
plt.subplot(1, 2, 1)
plt.title("White Matter")
plt.plot(lut_white) # Use cmap='gray' if image is grayscale
plt.axis('off')
# Plot transformed image
plt.subplot(1, 2, 2)
plt.title("Gray Matter")
plt.plot(lut_gray)
plt.axis('off')
plt.show()
In [385]:
plt.figure(figsize=(10, 5))
# Plot original image
plt.subplot(1, 2, 1)
plt.title("White Matter")
white_matter_rgb = cv.cvtColor(white_matter, cv.COLOR_BGR2RGB)
plt.imshow(white_matter_rgb) # Use cmap='gray' if image is grayscale
plt.axis('off')
# Plot transformed image
plt.subplot(1, 2, 2)
plt.title("Gray Matter")
gray_matter_rgb = cv.cvtColor(gray_matter, cv.COLOR_BGR2RGB)
plt.imshow(gray_matter_rgb)
plt.axis('off')
plt.show()

In [386]:
img3 = cv.imread("a1images/highlights_and_shadows.jpg")
img3_rgb = cv.cvtColor(img3, cv.COLOR_BGR2RGB)
plt.imshow(img3_rgb)
Out[386]:
<matplotlib.image.AxesImage at 0x15a834cdf60>
In [387]:
img_lab = cv.cvtColor(img3, cv.COLOR_BGR2LAB)
L, a, b = cv.split(img_lab)
L_norm = L / 255.0
gamma = 0.5
L_gamma = np.power(L_norm, gamma)
L_corrected = np.uint8(L_gamma * 255)
img_lab_corrected = cv.merge([L_corrected, a, b])
img_corrected = cv.cvtColor(img_lab_corrected, cv.COLOR_LAB2BGR)
In [388]:
img3_corrected_rgb = cv.cvtColor(img_corrected,cv.COLOR_BGR2RGB)
plt.imshow(img3_corrected_rgb)
Out[388]:
<matplotlib.image.AxesImage at 0x15a830618d0>
In [389]:
plt.figure(figsize=(12,5))
# Original L histogram
plt.subplot(1,2,1)
plt.hist(L.ravel(), bins=256, color='gray')
plt.title('Original L channel histogram')
plt.xlabel('Intensity')
plt.ylabel('Pixel count')
# Corrected L histogram
plt.subplot(1,2,2)
plt.hist(L_corrected.ravel(), bins=256, color='gray')
plt.title(f'Gamma Corrected L channel histogram (gamma={gamma})')
plt.xlabel('Intensity')
plt.ylabel('Pixel count')
plt.show()

In [ ]:
img4 = cv.imread("a1images/spider.png")
'[My' is not recognized as an internal or external command, operable program or batch file.
In [391]:
hsv = cv.cvtColor(img4,cv.COLOR_BGR2HSV)
h, s, v = cv.split(hsv)
In [392]:
# transformation
a_values = [0.2, 0.5, 0.7, 0.9]
sigma = 70
x = s.astype("float32")
s_new_list = []
img_vibrance_list = []
for a in a_values:
mul = a * 128 * np.exp(-((x - 128) ** 2) / (2 * sigma ** 2))
s_new = np.minimum(x + mul, 255).astype("uint8")
s_new_list.append(s_new)
hsv_new = cv.merge([h, s_new, v])
img_vibrance = cv.cvtColor(hsv_new, cv.COLOR_HSV2BGR)
img_vibrance_list.append(img_vibrance)
In [393]:
plt.figure(figsize=(15, 5))
for i, a in enumerate(a_values):
plt.subplot(1, len(a_values), i + 1)
plt.title(f'a = {a}')
plt.imshow(cv.cvtColor(img_vibrance_list[i], cv.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
In [394]:
x_vals = np.arange(0, 256, 1, dtype=np.float32)
plt.figure(figsize=(8, 5))
for a in a_values:
y_vals = x_vals + a * 128 * np.exp(-((x_vals - 128) ** 2) / (2 * sigma ** 2))
y_vals = np.clip(y_vals, 0, 255)
plt.plot(x_vals, y_vals, label=f'a={a}')
plt.plot(x_vals, x_vals, 'k--', label='identity') # y=x line for reference
plt.title("Transformation function curves")
plt.xlabel("Input saturation value")
plt.ylabel("Output saturation value")
plt.legend()
plt.grid(True)
plt.show()
In [395]:
plt.figure(figsize=(10, 5))
# Plot original image
plt.subplot(1, 2, 1)
plt.title("Original Image")
plt.imshow(cv.cvtColor(img4,cv.COLOR_BGR2RGB)) # Use cmap='gray' if image is grayscale
plt.axis('off')
# Plot transformed image
plt.subplot(1, 2, 2)
plt.title("Vibrance Enhanced Image with a = 0.7")
plt.imshow(cv.cvtColor(img_vibrance_list[2],cv.COLOR_BGR2RGB))
plt.axis('off')
plt.show()

In [396]:
img5 = cv.imread("a1images/shells.tif")
In [397]:
def histogram_equalization(img):
"""
Perform histogram equalization on a grayscale image.
Args:
img (np.ndarray): Grayscale image (2D array, dtype=uint8).
Returns:
np.ndarray: Equalized image (uint8).
"""
# Flatten image to 1D array
flat = img.flatten()
# Compute histogram
hist = np.bincount(flat, minlength=256)
# Normalize histogram (PDF)
pdf = hist / flat.size
# Compute CDF
cdf = np.cumsum(pdf)
# Normalize CDF to range [0, 255]
cdf_normalized = np.floor(255 * cdf).astype(np.uint8)
# Map original pixel values to equalized values using CDF
equalized_img = cdf_normalized[flat].reshape(img.shape)
return equalized_img
In [398]:
equalized_img = histogram_equalization(img5)
# Plot original and equalized images + histograms
fig, axs = plt.subplots(2, 2, figsize=(12, 8))
# Original Image
axs[0, 0].imshow(cv.cvtColor(img5,cv.COLOR_BGR2RGB))
axs[0, 0].set_title('Original Image')
axs[0, 0].axis('off')
# Original Histogram
axs[1, 0].hist(img5.ravel(), bins=256, range=[0, 255], color='black')
axs[1, 0].set_title('Original Histogram')
# Equalized Image
axs[0, 1].imshow(cv.cvtColor(equalized_img,cv.COLOR_BGR2RGB))
axs[0, 1].set_title('Equalized Image')
axs[0, 1].axis('off')
# Equalized Histogram
axs[1, 1].hist(equalized_img.ravel(), bins=256, range=[0, 255], color='black')
axs[1, 1].set_title('Equalized Histogram')
plt.tight_layout()
plt.show()

In [399]:
img6 = cv.imread("a1images/jeniffer.jpg")
plt.imshow(cv.cvtColor(img6,cv.COLOR_BGR2RGB))
Out[399]:
<matplotlib.image.AxesImage at 0x15ae2240610>
In [400]:
hsv = cv.cvtColor(img6, cv.COLOR_BGR2HSV)
h, s, v = cv.split(hsv)
In [401]:
plt.figure(figsize=(24, 6))
plt.subplot(1, 3, 1)
plt.imshow(h,cmap="gray")
plt.title('Hue')
plt.axis('off')
plt.subplot(1, 3, 2)
plt.imshow(s,cmap="gray")
plt.title('Saturation')
plt.axis('off')
plt.subplot(1, 3, 3)
plt.imshow(v,cmap="gray")
plt.title('Value')
plt.axis('off')
plt.show()
In [402]:
# Define the threshold values for saturation (can be tuned)
saturation_min = 15
saturation_max = 255
# Create a binary mask based on saturation range (foreground)
foreground_mask = cv.inRange(s, saturation_min, saturation_max)
#Cleaned mask with morphological closing
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (85,85))
cleaned_mask = cv.morphologyEx(foreground_mask, cv.MORPH_CLOSE, kernel)
# Use the mask to keep Value channel pixels inside mask, zero outside
foreground_value = cv.bitwise_and(v, v, mask=foreground_mask)
# Foreground value with cleaned mask
foreground_value_cleaned_mask = cv.bitwise_and(v, v, mask=cleaned_mask)
plt.figure(figsize=(20, 6))
plt.subplot(1, 4, 1)
plt.imshow(foreground_mask,cmap="gray")
plt.title('foreground_mask')
plt.axis('off')
plt.subplot(1, 4, 2)
plt.imshow(foreground_value,cmap="gray")
plt.title('foreground_value')
plt.axis('off')
plt.subplot(1, 4, 3)
plt.imshow(cleaned_mask,cmap="gray")
plt.title('cleaned_foreground_mask')
plt.axis('off')
plt.subplot(1, 4, 4)
plt.imshow(foreground_value_cleaned_mask,cmap="gray")
plt.title('foreground_value_with_cleaned_mask')
plt.axis('off')
Out[402]:
(np.float64(-0.5), np.float64(3839.5), np.float64(2159.5), np.float64(-0.5))
In [403]:
# Compute histogram of foreground pixels only (where mask > 0)
foreground_pixels = foreground_value_cleaned_mask[cleaned_mask > 0]
hist = np.bincount(foreground_pixels, minlength=256)
plt.figure()
plt.title('Foreground Histogram')
plt.bar(np.arange(256), hist, color='black')
plt.xlim([0, 255])
plt.show()
In [404]:
# (d) Obtain cumulative sum (CDF) of histogram
cdf = np.cumsum(hist)
# Plot the cumulative histogram
plt.plot(cdf)
plt.title('Cumulative Histogram of Foreground')
plt.xlabel('Pixel Value')
plt.ylabel('Cumulative Frequency')
plt.show()
In [405]:
# (e) Histogram equalization:
equalized_value = cv.equalizeHist(foreground_value_cleaned_mask)
equalized_hsv = cv.merge([h,s,equalized_value])
plt.imshow(cv.cvtColor(equalized_hsv,cv.COLOR_HSV2RGB))
Out[405]:
<matplotlib.image.AxesImage at 0x15a8c698310>
In [406]:
background_mask = cv.bitwise_not(cleaned_mask)
extracted_background = cv.bitwise_and(img6,img6,mask = background_mask)
result = cv.add(extracted_background, cv.cvtColor(equalized_hsv, cv.COLOR_HSV2BGR))
# Display the result
plt.figure(figsize=(24, 6))
# Original Image
plt.subplot(1, 3, 1)
plt.imshow(cv.cvtColor(img6, cv.COLOR_BGR2RGB))
plt.title('Original Image')
plt.axis('off')
# Extracted Background
plt.subplot(1, 3, 2)
plt.imshow(cv.cvtColor(extracted_background, cv.COLOR_BGR2RGB))
plt.title('Extracted Background')
plt.axis('off')
# Background + Histogram Equalized Foreground
plt.subplot(1, 3, 3)
plt.imshow(cv.cvtColor(result, cv.COLOR_BGR2RGB))
plt.title('Background + Histogram Equalized Foreground')
plt.axis('off')
plt.tight_layout()
plt.show()

In [407]:
# Load grayscale image
img7 = cv.imread('a1images/einstein.png', cv.IMREAD_GRAYSCALE)
# Sobel filters for x and y direction
sobel_x = cv.Sobel(img7, cv.CV_64F, 1, 0, ksize=3) # Gx
sobel_y = cv.Sobel(img7, cv.CV_64F, 0, 1, ksize=3) # Gy
# Compute gradient magnitude
magnitude = np.sqrt(sobel_x**2 + sobel_y**2)
magnitude = np.uint8(np.clip(magnitude, 0, 255))
plt.figure(figsize=(20,6))
plt.subplot(1,4,1)
plt.imshow(cv.cvtColor(img7,cv.COLOR_BGR2RGB))
plt.title('Original Image')
plt.axis('off')
plt.subplot(1,4,2)
plt.imshow(np.abs(sobel_x), cmap='gray')
plt.title('Sobel Horizontal (Gx)')
plt.axis('off')
plt.subplot(1,4,3)
plt.imshow(np.abs(sobel_y), cmap='gray')
plt.title('Sobel Vertical (Gy)')
plt.axis('off')
plt.subplot(1,4,4)
plt.imshow(magnitude, cmap='gray')
plt.title('Gradient Magnitude')
plt.axis('off')
plt.show()
In [408]:
def convolve2d(image, kernel):
kH, kW = kernel.shape
pad_h, pad_w = kH // 2, kW // 2
# Pad the image with zeros on all sides
padded_img = np.pad(image, ((pad_h, pad_h), (pad_w, pad_w)), mode='constant', constant_values=0)
output = np.zeros_like(image, dtype=np.float64)
# Iterate over every pixel of the image
for i in range(image.shape[0]):
for j in range(image.shape[1]):
# Extract the region of interest
roi = padded_img[i:i+kH, j:j+kW]
# Perform element-wise multiplication and sum
output[i, j] = np.sum(roi * kernel)
return output
# Define Sobel kernels
sobel_x = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]], dtype=np.float64)
sobel_y = np.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]], dtype=np.float64)
# Apply Sobel filter manually
Gx = convolve2d(img7, sobel_x)
Gy = convolve2d(img7, sobel_y)
# Calculate gradient magnitude
magnitude = np.sqrt(Gx**2 + Gy**2)
magnitude = np.clip(magnitude, 0, 255).astype(np.uint8)
plt.figure(figsize=(20,6))
plt.subplot(1,4,1)
plt.imshow(cv.cvtColor(img7,cv.COLOR_BGR2RGB))
plt.title('Original Image')
plt.axis('off')
plt.subplot(1,4,2)
plt.imshow(np.abs(Gx), cmap='gray')
plt.title('Sobel Horizontal (Gx)')
plt.axis('off')
plt.subplot(1,4,3)
plt.imshow(np.abs(Gy), cmap='gray')
plt.title('Sobel Vertical (Gy)')
plt.axis('off')
plt.subplot(1,4,4)
plt.imshow(magnitude, cmap='gray')
plt.title('Gradient Magnitude')
plt.axis('off')
plt.show()
In [409]:
def convolve1d(image, kernel, axis):
k_size = kernel.shape[0]
pad = k_size // 2
padded = np.pad(image, ((pad, pad), (pad, pad)), mode='constant', constant_values=0)
output = np.zeros_like(image, dtype=np.float64)
if axis == 0: # vertical convolution
for i in range(image.shape[0]):
for j in range(image.shape[1]):
roi = padded[i:i+k_size, j+pad]
output[i, j] = np.sum(roi * kernel)
elif axis == 1: # horizontal convolution
for i in range(image.shape[0]):
for j in range(image.shape[1]):
roi = padded[i+pad, j:j+k_size]
output[i, j] = np.sum(roi * kernel)
else:
raise ValueError("Axis must be 0 (vertical) or 1 (horizontal)")
return output
# Define 1D kernels
k_vert = np.array([1, 2, 1], dtype=np.float64) # vertical kernel
k_horz = np.array([1, 0, -1], dtype=np.float64) # horizontal kernel
# First convolve vertically
temp = convolve1d(img7, k_vert, axis=0)
# Then convolve horizontally
sobel_separable = convolve1d(temp, k_horz, axis=1)
# Take absolute value and clip for display
sobel_separable_abs = np.clip(np.abs(sobel_separable), 0, 255).astype(np.uint8)
plt.figure(figsize=(10,6))
plt.subplot(1,2,1)
plt.imshow(cv.cvtColor(img7,cv.COLOR_BGR2RGB))
plt.title('Original Image')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(magnitude, cmap='gray')
plt.title('Sobel Filter using separable kernels')
plt.axis('off')
plt.show()

In [410]:
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
def zoom_image(img, zoom_factor, method='nearest'):
if zoom_factor <= 0 or zoom_factor > 10:
raise ValueError("Zoom factor must be in (0, 10].")
h, w = img.shape[:2]
new_h, new_w = int(h * zoom_factor), int(w * zoom_factor)
zoomed_img = np.zeros((new_h, new_w, *img.shape[2:]), dtype=img.dtype) if img.ndim == 3 else np.zeros((new_h, new_w), dtype=img.dtype)
for i in range(new_h):
for j in range(new_w):
orig_x = i / zoom_factor
orig_y = j / zoom_factor
if method == 'nearest':
x_nearest = min(int(round(orig_x)), h - 1)
y_nearest = min(int(round(orig_y)), w - 1)
zoomed_img[i, j] = img[x_nearest, y_nearest]
elif method == 'bilinear':
x0 = int(np.floor(orig_x))
x1 = min(x0 + 1, h - 1)
y0 = int(np.floor(orig_y))
y1 = min(y0 + 1, w - 1)
dx = orig_x - x0
dy = orig_y - y0
val_00 = img[x0, y0]
val_01 = img[x0, y1]
val_10 = img[x1, y0]
val_11 = img[x1, y1]
val_x0 = val_00 * (1 - dy) + val_01 * dy
val_x1 = val_10 * (1 - dy) + val_11 * dy
val = val_x0 * (1 - dx) + val_x1 * dx
zoomed_img[i, j] = val
else:
raise ValueError("Unsupported method. Use either 'nearest' or 'bilinear'.")
return zoomed_img.astype(img.dtype)
def normalized_ssd(img1, img2):
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
return np.sum((img1 - img2) ** 2) / np.sum(img1 ** 2)
# Images
image_pairs = [
("a1images/a1q5images/im01.png", "a1images/a1q5images/im01small.png"),
("a1images/a1q5images/im02.png", "a1images/a1q5images/im02small.png"),
("a1images/a1q5images/im03.png", "a1images/a1q5images/im03small.png"),
("a1images/a1q5images/taylor.jpg", "a1images/a1q5images/taylor_small.jpg")
]
zoom_factor = 4
plt.figure(figsize=(15, 20))
for idx, (full_path, small_path) in enumerate(image_pairs, start=1):
img_full = cv.imread(full_path)
img_small = cv.imread(small_path)
if img_full is None or img_small is None:
raise FileNotFoundError(f"Could not load images for set {idx}")
zoomed_nearest = zoom_image(img_small, zoom_factor, method='nearest')
zoomed_bilinear = zoom_image(img_small, zoom_factor, method='bilinear')
# Match full image size to zoomed images
img_full_resized = cv.resize(img_full, (zoomed_nearest.shape[1], zoomed_nearest.shape[0]), interpolation=cv.INTER_AREA)
# Compute SSD for both methods
ssd_nearest = normalized_ssd(img_full_resized, zoomed_nearest)
ssd_bilinear = normalized_ssd(img_full_resized, zoomed_bilinear)
print(f"Set {idx}:")
print(f" Normalized SSD (Nearest): {ssd_nearest:.6f}")
print(f" Normalized SSD (Bilinear): {ssd_bilinear:.6f}")
print("-" * 40)
plt.subplot(len(image_pairs), 3, (idx - 1) * 3 + 1)
plt.imshow(cv.cvtColor(img_full, cv.COLOR_BGR2RGB))
plt.title(f'Set {idx}: Original')
plt.axis('off')
plt.subplot(len(image_pairs), 3, (idx - 1) * 3 + 2)
plt.imshow(cv.cvtColor(zoomed_nearest, cv.COLOR_BGR2RGB))
plt.title(f'Nearest x{zoom_factor}')
plt.axis('off')
plt.subplot(len(image_pairs), 3, (idx - 1) * 3 + 3)
plt.imshow(cv.cvtColor(zoomed_bilinear, cv.COLOR_BGR2RGB))
plt.title(f'Bilinear x{zoom_factor}')
plt.axis('off')
plt.tight_layout()
plt.show()
Set 1: Normalized SSD (Nearest): 0.022590 Normalized SSD (Bilinear): 0.017719 ---------------------------------------- Set 2: Normalized SSD (Nearest): 0.010240 Normalized SSD (Bilinear): 0.007757 ---------------------------------------- Set 3: Normalized SSD (Nearest): 0.016428 Normalized SSD (Bilinear): 0.012518 ---------------------------------------- Set 4: Normalized SSD (Nearest): 0.005395 Normalized SSD (Bilinear): 0.004471 ----------------------------------------

In [411]:
# Load image
img9 = cv.imread("a1images/daisy.jpg")
In [412]:
#First find the best rectangle
img_copy = img9.copy()
# Change the parameters and find best rectangle
rect = (45, 130, img9.shape[1]-50, img9.shape[0]-200)
# Draw rectangle: cv.rectangle(image, pt1, pt2, color, thickness)
cv.rectangle(img_copy, (rect[0], rect[1]),
(rect[0] + rect[2], rect[1] + rect[3]),
(0, 255, 0), 2)
# Show
plt.imshow(cv.cvtColor(img_copy, cv.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
In [413]:
# Create mask and models for GrabCut
mask = np.zeros(img9.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
# Rectangle around the flower
rect = (45, 130, img9.shape[1]-50, img9.shape[0]-200)
# Run GrabCut
cv.grabCut(img9, mask, rect, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_RECT)
# Extract mask where foreground pixels are marked as 1 or 3
mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
# Foreground and background extraction
foreground = img9 * mask2[:, :, np.newaxis]
background = img9 * (1 - mask2[:, :, np.newaxis])
# Show results
plt.figure(figsize=(15, 6))
plt.subplot(1, 3, 1)
plt.title("Segmentation Mask")
plt.imshow(mask2, cmap='gray')
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title("Foreground")
plt.imshow(cv.cvtColor(foreground, cv.COLOR_BGR2RGB))
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title("Background")
plt.imshow(cv.cvtColor(background, cv.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
In [414]:
# Apply Gaussian blur to original image
blurred = cv.GaussianBlur(img9, (19, 19), 0)
# Combine blurred background with sharp foreground
enhanced = blurred.copy()
enhanced[mask2 == 1] = img9[mask2 == 1]
# Show original and enhanced
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.title("Original")
plt.imshow(cv.cvtColor(img9, cv.COLOR_BGR2RGB))
plt.axis('off')
plt.subplot(1, 2, 2)
plt.title("Enhanced with Blurred Background")
plt.imshow(cv.cvtColor(enhanced, cv.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
(c) Why is the background just beyond the edge of the flower dark? That dark halo happens because:
GrabCut doesn’t perfectly segment fine boundaries like flower edges.
Some background pixels close to the edge are marked as probable foreground but have darker color than the main flower.
When we combine the blurred background with the sharp foreground, those edge pixels mix with the blurred/dark background, resulting a dark outline.